Testing Keras Model

Here I'll be running tests on the model that I've designed, and improving logs and so forth for the final model.

Below is the code from model.py:


In [1]:
import os, re, random
import sys, argparse, codecs
import itertools as it
import numpy as np
from keras.models import Sequential, load_model
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras.utils import np_utils


Using TensorFlow backend.

In [2]:
def parse_args():
    '''Parses all keyword arguments for model and returns them.

       Returns:
        - data_dir:   (str) The directory to the text file(s) for training.
        - rnn_size:   (int) The number of cells in each hidden layer in 
                      the network.
        - num_layers: (int) The number of hidden layers in the network.
        - dropout:    (float) Dropout value (between 0, 1 exclusive).'''

    # initialise parser
    parser = argparse.ArgumentParser()

    # add arguments, set default values and expected types
    parser.add_argument("-data_dir",
        help="The directory to the text file(s) for training.")
    parser.add_argument("-seq_length", type=int, default=25,
        help="The length of sequences to be used for training")
    parser.add_argument("-validation_split", type=float, default=0.1,
        help="The proportion of the training data to use for validation")
    parser.add_argument("-batch_size", type=int, default=100,
        help="The number of minibatches to be used for training")
    parser.add_argument("-rnn_size", type=int, default=128,
        help="The number of cells in each hidden layer in the network")
    parser.add_argument("-num_layers", type=int, default=3,
        help="The number of hidden layers in the network")
    parser.add_argument("-dropout", type=float, default=0.1,
        help="Dropout value (between 0, 1 exclusive)")
    parser.add_argument("-epochs", type=int, default=20,
        help="Number of epochs for training")
    parser.add_argument("-tensorboard", type=int, default=1,
        help="Save model statistics to tensorboard")

    # assert args.validation_split < 0.5
    
    # parse arguments and return their values
    args = parser.parse_args()
    return args.data_dir, args.seq_length, args.validation_split, \
           args.batch_size, args.rnn_size, args.num_layers, args.dropout, \
           args.epochs, args.tensorboard

In [3]:
def print_data(text):
    '''Re-encodes text so that it can be printed to command line 
       without raising a UnicodeEncodeError, and then prints it.
       Incompatible characters are simply dropped before printing.

       Args:
       - text: (str) The text to be printed'''

    print(text.encode(sys.stdout.encoding, errors='replace'))

In [4]:
def load_data(data_dir, encoding='utf-8'):
    '''Appends all text files in data_dir into a single string and returns it.
       All files are assumed to be utf-8 encoded, and of type '.txt'.

       Args:
       - data_dir: (str) The directory to text files for training.
       - encoding: (str) The type of encoding to use when decoding each file.

       Returns:
       - text_data: (str) Appended files as a single string.'''

    print("Loading data from %s" % os.path.abspath(data_dir))
    # Initialise text string
    text_data = ''
    # select .txt files from data_dir
    for filename in filter(lambda s: s.endswith(".txt"), os.listdir(data_dir)):
        # open file with default encoding
        print("loading file: %s" % filename)
        filepath = os.path.abspath(os.path.join(data_dir, filename))
        with open(filepath,'r', encoding = encoding) as f:
            text_data += f.read() + "\n"
    return text_data

In [5]:
def process_text(text_data, seq_length):
    '''Preprocesses text_data for RNN model.

       Args:
       - text: (str) text file to be processed.
       - seq_length: (int) length of character sequences to be considered 
                     in the training set.

       Returns:
       - char_to_int: (dict) Maps characters in the character set to ints.
       - int_to_char: (dict) Maps ints to characters in the character set.
       - n_chars: (int) The number of characters in the text.
       - n_vocab: (int) The number of unique characters in the text.'''

    # create mapping of unique chars to integers, and a reverse mapping
    chars = sorted(set(text_data))
    char_to_int = {c: i for i, c in enumerate(chars)}
    int_to_char = {i: c for i, c in enumerate(chars)}

    # summarize the loaded data
    n_chars = len(text_data)
    n_vocab = len(chars)
    
    return char_to_int, int_to_char, n_chars, n_vocab

In [6]:
def get_batch(batch, starts, text_data, seq_length, batch_size, 
              char_to_int, n_vocab):
    '''A generator that returns sequences of length seq_length, in
       batches of size batch_size.
       
       Args:
       - batch: (int) The index of the batch to be returned
       - text_data: (str) The text to feed the model
       - seq_length: (int) The length of each training sequence
       - batch_size: (int) The size of minibatches for training'''
    
    # prepare the dataset of input to output pairs encoded as integers
    dataX = []
    dataY = []
    for start in range(batch_size * batch, batch_size * (batch + 1)): 
        seq_in  = text_data[starts[start]:starts[start] + seq_length]
        seq_out = text_data[starts[start] + seq_length]
        dataX.append([char_to_int[char] for char in seq_in])
        dataY.append(char_to_int[seq_out])
        
    X = np_utils.to_categorical(dataX, num_classes=n_vocab)
    X = X.reshape(batch_size, seq_length, n_vocab)

    # one hot encode the output variable
    y = np_utils.to_categorical(dataY, num_classes=n_vocab)
    
    return X, y

In [7]:
def generate_batches(mode, text_data, seq_length, validation_split,
                     batch_size, char_to_int, n_chars, n_vocab,
                     random_seed=1234, shuffle=True):
    '''A generator that returns training sequences of length seq_length, in
       batches of size batch_size.

       Args:
       - mode: (str) Whether the batch is for training or validation. 
               'validation' or 'train' only
       - text_data: (str) The text for training
       - seq_length: (int) The length of each training sequence
       - batch_size: (int) The size of minibatches for training
       - validation_split: (float) The proportion of batches to use as 
                           validation data
       - random_seed: A random seed'''

    # set random seed
    random.seed(random_seed)
    
    # index the text_data
    starts = list(range(n_chars - n_chars % seq_length - seq_length))
    
    if shuffle:
        # shuffle the indices
        random.shuffle(starts)
    
#     while True:
    
    n_batches = n_chars // batch_size
    validation_size = round(n_batches * validation_split)
    if mode == 'validation':
        for batch in range(validation_size):
            X, y = get_batch(batch, starts, text_data, seq_length, 
                             batch_size, char_to_int, n_vocab)
            yield X, y
            
    elif mode == 'train':
        for batch in range(validation_size, n_batches):
            X, y = get_batch(batch, starts, text_data, seq_length, 
                             batch_size, char_to_int, n_vocab)
            yield X, y
    else:
        raise ValueError("only 'validation' and 'train' modes accepted")

In [8]:
def build_model(batch_size, seq_length, n_vocab, 
                rnn_size, num_layers, drop_prob):
    '''Defines the RNN LSTM model.

       Args:
        - batch_size: (int) The size of each minibatches.
        - seq_length: (int) The length of each sequence for the model.
        - rnn_size: (int) The number of cells in each hidden layer.
        - num_layers: (int) The number of hidden layers in the network.
        - drop_prob: (float) The proportion of cells to drop in each dropout 
                             layer.
       Returns:
        - model: (keras.models.Sequential) The constructed Keras model.'''

    model = Sequential()
    for i in range(num_layers):
        if i == num_layers - 1:
            # add last hidden layer
            model.add(LSTM(rnn_size, return_sequences=False))
        elif i == 0:
            # add first hidden layer
            model.add(LSTM(rnn_size, 
                           batch_input_shape=(None, seq_length, n_vocab),
                           return_sequences=True))
        else:
            # add middle hidden layer
            model.add(LSTM(rnn_size, return_sequences=True))
        
        model.add(Dropout(drop_prob))
    # add output layer
    model.add(Dense(n_vocab, activation='softmax'))

    # compile model
    model.compile(loss='categorical_crossentropy', optimizer='adam',
                  metric=['accuracy'])  

    return model

    # build and compile Keras model
    model = build_model(batch_size, seq_length, n_vocab,
                        rnn_size, num_layers, drop_prob)

In [9]:
def set_callbacks(verbose, use_tensorboard):
    '''Set callbacks for Keras model.

    Args:
     - use_tensorboard: (int) Add TensorBoard callback if use_tensorboard == 1

    Returns:
     - callbacks: (list) list of callbacks for model'''

    callbacks = [ModelCheckpoint(
                    r'..\checkpoints\weights.{epoch:02d}-{val_loss:.2f}.hdf5',
                    verbose=verbose)]
    if use_tensorboard:
        tb_callback = TensorBoard(log_dir=r'..\logs', histogram_freq=0.01,
                              write_images=True)
        callbacks.append(tb_callback)  

    return callbacks

In [10]:
def fit_model(model, text_data, seq_length, validation_split, epochs, 
              batch_size, char_to_int, n_chars, n_vocab, verbose, use_tensorboard):
    '''Trains the model on the training data.

       Args:
       - model:
       - text_data:
       - seq_length:
       - batch_size:
       - char_to_int:'''
    n_batches = len(text_data) // batch_size
    batch_params = (text_data, seq_length, validation_split,
                     batch_size, char_to_int, n_chars, n_vocab)
    hist = model.fit_generator(
               generator = generate_batches('train', *batch_params),
               validation_data = generate_batches('validation', *batch_params),
               validation_steps = int(n_batches * validation_split),
               epochs = epochs,
               steps_per_epoch = n_batches,
               verbose = verbose,
               callbacks = set_callbacks(verbose, use_tensorboard))
    return hist

In [11]:
def Main():
    '''Executes the model'''

    # load text data to memory
    text_data = load_data(data_dir)

    # preprocess the text - construct character dictionaries etc
    char_to_int, int_to_char, n_chars, n_vocab = \
                                process_text(text_data, seq_length)

    # build and compile Keras model
    model = build_model(batch_size, seq_length, n_vocab,
                        rnn_size, num_layers, drop_prob)

    # fit model using generator
    hist = fit_model(model, text_data, seq_length, validation_split, epochs,
                   batch_size, char_to_int, n_chars, n_vocab, use_tensorboard)

In [12]:
data_dir, seq_length, validation_split, batch_size, rnn_size, \
    num_layers, drop_prob, epochs, verbose, use_tensorboard = \
        (r'..\data', 25, 0.1, 100, 128, 3, 0.1, 10, 1, True)

In [13]:
# load text data to memory
text_data = load_data(data_dir)

# preprocess the text - construct character dictionaries etc
char_to_int, int_to_char, n_chars, n_vocab = process_text(text_data, seq_length)

# build and compile Keras model
model = build_model(batch_size, seq_length, n_vocab,
                    rnn_size, num_layers, drop_prob)

hist = fit_model(model, text_data, seq_length, validation_split, epochs,
                     batch_size, char_to_int, n_chars, n_vocab,  
                     verbose, use_tensorboard)


Loading data from C:\Users\caleb\Documents\Data Science\welcome-to-night-vale\data
loading file: Welcome To Night Vale.txt
C:\Users\caleb\AppData\Local\conda\conda\envs\tensorflow-env\lib\site-packages\keras\backend\tensorflow_backend.py:2250: UserWarning: Expected no kwargs, you passed 1
kwargs passed to function are ignored with Tensorflow backend
  warnings.warn('\n'.join(msg))
INFO:tensorflow:Summary name lstm_1/kernel:0 is illegal; using lstm_1/kernel_0 instead.
INFO:tensorflow:Summary name lstm_1/kernel:0 is illegal; using lstm_1/kernel_0 instead.
INFO:tensorflow:Summary name lstm_1/recurrent_kernel:0 is illegal; using lstm_1/recurrent_kernel_0 instead.
INFO:tensorflow:Summary name lstm_1/recurrent_kernel:0 is illegal; using lstm_1/recurrent_kernel_0 instead.
INFO:tensorflow:Summary name lstm_1/bias:0 is illegal; using lstm_1/bias_0 instead.
INFO:tensorflow:Summary name lstm_1/bias:0 is illegal; using lstm_1/bias_0 instead.
INFO:tensorflow:Summary name lstm_2/kernel:0 is illegal; using lstm_2/kernel_0 instead.
INFO:tensorflow:Summary name lstm_2/kernel:0 is illegal; using lstm_2/kernel_0 instead.
INFO:tensorflow:Summary name lstm_2/recurrent_kernel:0 is illegal; using lstm_2/recurrent_kernel_0 instead.
INFO:tensorflow:Summary name lstm_2/recurrent_kernel:0 is illegal; using lstm_2/recurrent_kernel_0 instead.
INFO:tensorflow:Summary name lstm_2/bias:0 is illegal; using lstm_2/bias_0 instead.
INFO:tensorflow:Summary name lstm_2/bias:0 is illegal; using lstm_2/bias_0 instead.
INFO:tensorflow:Summary name lstm_3/kernel:0 is illegal; using lstm_3/kernel_0 instead.
INFO:tensorflow:Summary name lstm_3/kernel:0 is illegal; using lstm_3/kernel_0 instead.
INFO:tensorflow:Summary name lstm_3/recurrent_kernel:0 is illegal; using lstm_3/recurrent_kernel_0 instead.
INFO:tensorflow:Summary name lstm_3/recurrent_kernel:0 is illegal; using lstm_3/recurrent_kernel_0 instead.
INFO:tensorflow:Summary name lstm_3/bias:0 is illegal; using lstm_3/bias_0 instead.
INFO:tensorflow:Summary name lstm_3/bias:0 is illegal; using lstm_3/bias_0 instead.
INFO:tensorflow:Summary name dense_1/kernel:0 is illegal; using dense_1/kernel_0 instead.
INFO:tensorflow:Summary name dense_1/kernel:0 is illegal; using dense_1/kernel_0 instead.
INFO:tensorflow:Summary name dense_1/bias:0 is illegal; using dense_1/bias_0 instead.
INFO:tensorflow:Summary name dense_1/bias:0 is illegal; using dense_1/bias_0 instead.
Epoch 1/10
    9/16284 [..............................] - ETA: 17029s - loss: 5.2155
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-13-15e8ca6c22c8> in <module>()
     11 hist = fit_model(model, text_data, seq_length, validation_split, epochs,
     12                      batch_size, char_to_int, n_chars, n_vocab,
---> 13                      verbose, use_tensorboard)

<ipython-input-10-780f17a22e57> in fit_model(model, text_data, seq_length, validation_split, epochs, batch_size, char_to_int, n_chars, n_vocab, verbose, use_tensorboard)
     19                steps_per_epoch = n_batches,
     20                verbose = verbose,
---> 21                callbacks = set_callbacks(verbose, use_tensorboard))
     22     return hist

C:\Users\caleb\AppData\Local\conda\conda\envs\tensorflow-env\lib\site-packages\keras\legacy\interfaces.py in wrapper(*args, **kwargs)
     86                 warnings.warn('Update your `' + object_name +
     87                               '` call to the Keras 2 API: ' + signature, stacklevel=2)
---> 88             return func(*args, **kwargs)
     89         wrapper._legacy_support_signature = inspect.getargspec(func)
     90         return wrapper

C:\Users\caleb\AppData\Local\conda\conda\envs\tensorflow-env\lib\site-packages\keras\models.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_q_size, workers, pickle_safe, initial_epoch)
   1108                                         workers=workers,
   1109                                         pickle_safe=pickle_safe,
-> 1110                                         initial_epoch=initial_epoch)
   1111 
   1112     @interfaces.legacy_generator_methods_support

C:\Users\caleb\AppData\Local\conda\conda\envs\tensorflow-env\lib\site-packages\keras\legacy\interfaces.py in wrapper(*args, **kwargs)
     86                 warnings.warn('Update your `' + object_name +
     87                               '` call to the Keras 2 API: ' + signature, stacklevel=2)
---> 88             return func(*args, **kwargs)
     89         wrapper._legacy_support_signature = inspect.getargspec(func)
     90         return wrapper

C:\Users\caleb\AppData\Local\conda\conda\envs\tensorflow-env\lib\site-packages\keras\engine\training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_q_size, workers, pickle_safe, initial_epoch)
   1888                     outs = self.train_on_batch(x, y,
   1889                                                sample_weight=sample_weight,
-> 1890                                                class_weight=class_weight)
   1891 
   1892                     if not isinstance(outs, list):

C:\Users\caleb\AppData\Local\conda\conda\envs\tensorflow-env\lib\site-packages\keras\engine\training.py in train_on_batch(self, x, y, sample_weight, class_weight)
   1631             ins = x + y + sample_weights
   1632         self._make_train_function()
-> 1633         outputs = self.train_function(ins)
   1634         if len(outputs) == 1:
   1635             return outputs[0]

C:\Users\caleb\AppData\Local\conda\conda\envs\tensorflow-env\lib\site-packages\keras\backend\tensorflow_backend.py in __call__(self, inputs)
   2227         session = get_session()
   2228         updated = session.run(self.outputs + [self.updates_op],
-> 2229                               feed_dict=feed_dict)
   2230         return updated[:len(self.outputs)]
   2231 

C:\Users\caleb\AppData\Local\conda\conda\envs\tensorflow-env\lib\site-packages\tensorflow\python\client\session.py in run(self, fetches, feed_dict, options, run_metadata)
    776     try:
    777       result = self._run(None, fetches, feed_dict, options_ptr,
--> 778                          run_metadata_ptr)
    779       if run_metadata:
    780         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

C:\Users\caleb\AppData\Local\conda\conda\envs\tensorflow-env\lib\site-packages\tensorflow\python\client\session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
    980     if final_fetches or final_targets:
    981       results = self._do_run(handle, final_targets, final_fetches,
--> 982                              feed_dict_string, options, run_metadata)
    983     else:
    984       results = []

C:\Users\caleb\AppData\Local\conda\conda\envs\tensorflow-env\lib\site-packages\tensorflow\python\client\session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
   1030     if handle is None:
   1031       return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
-> 1032                            target_list, options, run_metadata)
   1033     else:
   1034       return self._do_call(_prun_fn, self._session, handle, feed_dict,

C:\Users\caleb\AppData\Local\conda\conda\envs\tensorflow-env\lib\site-packages\tensorflow\python\client\session.py in _do_call(self, fn, *args)
   1037   def _do_call(self, fn, *args):
   1038     try:
-> 1039       return fn(*args)
   1040     except errors.OpError as e:
   1041       message = compat.as_text(e.message)

C:\Users\caleb\AppData\Local\conda\conda\envs\tensorflow-env\lib\site-packages\tensorflow\python\client\session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
   1019         return tf_session.TF_Run(session, options,
   1020                                  feed_dict, fetch_list, target_list,
-> 1021                                  status, run_metadata)
   1022 
   1023     def _prun_fn(session, handle, feed_dict, fetch_list):

KeyboardInterrupt: 

In [102]:
train_batches = list(generate_batches('train', text_data, seq_length, 
                   validation_split, batch_size, char_to_int, n_chars, 
                   n_vocab, random_seed=1234))

valid_batches = list(generate_batches('validation', text_data, seq_length, 
                   validation_split, batch_size, char_to_int, n_chars, 
                   n_vocab, random_seed=1234))

In [103]:
train_results = []
for inputs, target in train_batches:
    inputs = (inputs + 1) * n_vocab / 2
    inputs = inputs.astype(np.int32)
    
    for batch in range(batch_size):
        seq = inputs[batch, :, :].reshape(seq_length)
        seq_text = ''.join([int_to_char[x] for x in seq])\
                     .replace("\n", " ")
        targ = int_to_char[np.argmax(target, axis=1)[batch]]
        targ = targ.replace("\n", " ")
        train_results.append(seq_text + targ)

In [104]:
valid_results = []
for inputs, target in valid_batches:
    inputs = (inputs + 1) * n_vocab / 2
    inputs = inputs.astype(np.int32)
    
    for batch in range(batch_size):
        seq = inputs[batch, :, :].reshape(seq_length)
        seq_text = ''.join([int_to_char[x] for x in seq])\
                     .replace("\n", " ")
        targ = int_to_char[np.argmax(target, axis=1)[batch]]
        targ = targ.replace("\n", " ")
        valid_results.append(seq_text + targ)

I need to implement the shuffling for the training models.


In [105]:
valid_results[:20]


Out[105]:
[' them at gunpoint – that t',
 'u-  But there    a man-  T',
 'g wares+ the representativ',
 ' dives and loops around+ u',
 ' The only hints can be fou',
 'ther army was marching    ',
 'And then+ there was   + as',
 'l expression for kids- But',
 'l perpetrators saying thin',
 'malevolent spirits+ or tan',
 'ntact lenses you put in th',
 't impossible to look bad i',
 'g a religious holiday<”  T',
 "'like in the chanted blood",
 'filled hugs- Ve had unackn',
 'tly involved in the recove',
 'he said this wasn’t always',
 'on! Play Ball is only a fr',
 ' is over+ dear listeners- ',
 'move along like nothing ha']

In [106]:
train_results[:20]


Out[106]:
['t apart+ toes together- Ri',
 'y…what<…within the cave- A',
 'w what art really is- You ',
 'r word for the discreet bo',
 'ud with a ladder going up ',
 'es- Finer words were never',
 'color of the universe+ and',
 'anding in the center of th',
 's for a lot- That counts f',
 's I do not own yellow galo',
 'ng out in the desert-  “Vh',
 'rtwined in defiance of our',
 'y- Representatives for the',
 ' Rtreet- “You know+ the ki',
 't gave us more time to our',
 'nza+ panache+ elephantitis',
 'OK+ I won’t!   Con’t tell!',
 'o you- Ge is innocent+ and',
 'that history is a myth+ an',
 'sion+ eyes- Ve’ve also rec']

In [212]:
def sample_model(model, sample_length, n_vocab):
    '''Prints out a sequence of text generated from the text data'''
    
    feed_seq = random.sample(range(n_vocab), seq_length)
    print('initialisation:', ''.join([int_to_char[x] for x in feed_seq]))
    feed_seq = [normalize_ints(i, n_vocab) for i in feed_seq]
    for _ in range(sample_length):
        feed_seq = np.reshape(feed_seq, (1, seq_length, 1))
        next_char = model.predict(feed_seq, batch_size=1)
        feed_seq = np.reshape(feed_seq, seq_length)
        feed_seq = list(feed_seq)[1:] + [np.argmax(next_char)]
    
    out_seq = [int_to_char[round(x)] for x in feed_seq]
    print(out_seq)
    print("".join(out_seq).replace("\n", "\\n"))
    print(len(out_seq))

sample_model(model, 25, n_vocab)


initialisation: bbbbbbbbbbbbbbbbbbbbbbbbb
['&', 'Т', '͊', '͊', '͊', '͊', '͊', '͊', '͊', '͊', '͊', '͊', '͊', '͊', '͊', '͊', '͊', ' ', ' ', 'з', 'з', 'з', 'з', 'з', 'з']
&Т͊͊͊͊͊͊͊͊͊͊͊͊͊͊͊  зззззз
25

In [214]:


In [109]:
seq_in = [normalize_ints(i, n_vocab) for i in random.sample(range(n_vocab), 25)]

In [125]:
int_to_char[np.argmax(pred)]


Out[125]:
'Т'

In [110]:
seq_in = np.reshape(seq_in, (1, seq_length, 1))

In [113]:
pred = model.predict(np.reshape(seq_in, (1, seq_length, 1)), batch_size=1)

In [25]:
from collections import Counter

In [31]:
alphabet = sorted(set(text_data))
Counter(text_data).values()


Out[31]:
dict_values([1054, 4, 1, 29, 3, 1, 3296, 1970, 1, 21, 109498, 1, 1391, 5, 1, 1, 64, 19578, 1457, 1933, 79862, 5, 10, 1, 196, 1, 6, 4, 30354, 6, 4, 5, 2, 2, 3, 3, 3, 1, 1, 270480, 28604, 1, 37521, 1642, 3, 5, 5, 834, 106, 4, 160, 16, 1, 2, 119, 2316, 4, 41, 2, 28002, 4, 23, 3702, 4, 6, 10, 1208, 3, 802, 6, 1, 2, 415, 152, 1, 130, 27, 1, 1016, 4, 3, 1, 832, 76484, 19392, 1933, 7, 6, 2867, 788, 97433, 5, 1, 38093, 6804, 4, 1, 1, 11, 4, 6, 1, 2216, 2, 2116, 1, 3, 1378, 298, 49, 2, 1, 5, 4, 97646, 2, 15, 28235, 1, 1, 444, 553, 3, 2, 25274, 4, 832, 6, 2249, 54656, 4, 1, 1, 4, 1539, 1, 622, 3, 2, 3, 6, 1, 6, 106, 76, 3, 1, 5, 560, 79, 4, 1146, 1, 2, 196, 2, 2, 445, 23075, 71653, 3, 104, 4, 1, 4692, 1, 3, 9, 13, 2, 1813, 66583, 1, 3, 2, 2569, 3, 1806, 3105, 80, 2, 4, 1, 1, 17479, 2, 1, 3, 4, 3, 1418, 2, 1, 1840, 7591, 5, 11956, 1, 4, 5, 2, 2, 1, 2, 1417, 2, 4, 11900, 21831, 2, 2, 1, 3, 4, 4, 150348, 86679, 46722, 1, 3, 7, 18, 2, 4, 1])

In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]: